#include <xeno/if_packet.h>
#include <xeno/sched.h>
#include <xeno/interrupt.h>
-
+#include <xeno/skbuff.h>
#include <asm/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
struct vlan_group;
extern struct skb_completion_queues {
- struct sk_buff *rx; /* Packets received in interrupt context. */
- unsigned int rx_qlen;
- struct sk_buff *tx; /* Tx buffers defunct in interrupt context. */
+ struct sk_buff_head rx; /* Packets received in interrupt context. */
+ struct sk_buff *tx; /* Tx buffers defunct in interrupt context. */
} skb_queue[NR_CPUS] __cacheline_aligned;
/* Backlog congestion levels */
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/types.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <xeno/vif.h>
/* These two members must be first. */
struct sk_buff * next;
struct sk_buff * prev;
-
__u32 qlen;
- spinlock_t lock;
};
#define MAX_SKB_FRAGS 1 /* KAF: was 6 */
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
- spin_lock_init(&list->lock);
list->prev = (struct sk_buff *)list;
list->next = (struct sk_buff *)list;
list->qlen = 0;
* @list: list to use
* @newsk: buffer to queue
*
- * Queue a buffer at the start of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
* A buffer cannot be placed on two lists at the same time.
*/
* @list: list to use
* @newsk: buffer to queue
*
- * Queue a buffer at the end of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
* A buffer cannot be placed on two lists at the same time.
*/
unsigned long flags;
local_irq_save(flags);
-
- if ( unlikely(skb_queue[cpu].rx_qlen > 100) )
+ if ( unlikely(skb_queue_len(&skb_queue[cpu].rx) > 100) )
{
local_irq_restore(flags);
perfc_incr(net_rx_congestion_drop);
return NET_RX_DROP;
}
-
- skb->next = skb_queue[cpu].rx;
- skb_queue[cpu].rx = skb;
- skb_queue[cpu].rx_qlen++;
-
+ __skb_queue_tail(&skb_queue[cpu].rx, skb);
local_irq_restore(flags);
__cpu_raise_softirq(cpu, NET_RX_SOFTIRQ);
static void net_rx_action(struct softirq_action *h)
{
int offset, cpu = smp_processor_id();
- struct sk_buff *skb, *nskb;
+ struct sk_buff_head list, *q = &skb_queue[cpu].rx;
+ struct sk_buff *skb;
local_irq_disable();
- skb = skb_queue[cpu].rx;
- skb_queue[cpu].rx = NULL;
- skb_queue[cpu].rx_qlen = 0;
+ /* Code to patch to the private list header is invalid if list is empty! */
+ if ( unlikely(skb_queue_len(q) == 0) )
+ {
+ local_irq_enable();
+ return;
+ }
+ /* Patch the head and tail skbuffs to point at the private list header. */
+ q->next->prev = (struct sk_buff *)&list;
+ q->prev->next = (struct sk_buff *)&list;
+ /* Move the list to our private header. The public header is reinit'ed. */
+ list = *q;
+ skb_queue_head_init(q);
local_irq_enable();
- while ( skb != NULL )
+ while ( (skb = __skb_dequeue(&list)) != NULL )
{
ASSERT(skb->skb_type == SKB_ZERO_COPY);
unmap_domain_mem(skb->head);
- nskb = skb->next;
kfree_skb(skb);
- skb = nskb;
}
}
int setup_network_devices(void)
{
- int ret;
+ int i, ret;
extern char opt_ifname[];
memset(skb_queue, 0, sizeof(skb_queue));
+ for ( i = 0; i < smp_num_cpus; i++ )
+ skb_queue_head_init(&skb_queue[i].rx);
/* Actual receive processing happens in softirq context. */
open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);